# import the necessary packages
import os
# initialize the path to the *original* input directory of images
ORIG_INPUT_DATASET = "Food-5K"
# initialize the base path to the *new* directory that will contain
# our images after computing the training and testing split
BASE_PATH = "datasets/images/rps"
# define the names of the training, testing, and validation
# directories
TRAIN = "train"
TEST = "val"
# initialize the list of class label names
CLASSES = ["rock", "paper", "scissors"]
# set the batch size
BATCH_SIZE = 4
# initialize the label encoder file path and the output directory to
# where the extracted features (in CSV file format) will be stored
LE_PATH = os.path.sep.join(["rpsoutput", "le.cpickle"])
BASE_CSV_PATH = "rpsoutput"
# set the path to the serialized model after training
MODEL_PATH = os.path.sep.join(["rpsoutput", "model.cpickle"])import the necessary packages
from pyimagesearch import config from imutils import paths import shutil import os # loop over the data splits for split in (config.TRAIN, config.TEST, config.VAL): # grab all image paths in the current split print(“[INFO] processing ‘{} split’…”.format(split)) p = os.path.sep.join([config.ORIG_INPUT_DATASET, split]) imagePaths = list(paths.list_images(p)) # loop over the image paths for imagePath in imagePaths: # extract class label from the filename filename = imagePath.split(os.path.sep)[-1] label = config.CLASSES[int(filename.split(“_“)[0])] # construct the path to the output directory dirPath = os.path.sep.join([config.BASE_PATH, split, label]) # if the output directory does not exist, create it if not os.path.exists(dirPath): os.makedirs(dirPath) # construct the path to the output image file and copy it p = os.path.sep.join([dirPath, filename]) shutil.copy2(imagePath, p)
# import the necessary packages
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.applications import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from imutils import paths
import numpy as np
import pickle
import random
import os
# load the VGG16 network and initialize the label encoder
print("[INFO] loading network...")
model = VGG16(weights="imagenet", include_top=False)
le = None
# loop over the data splits
for split in (TRAIN, TEST):
# grab all image paths in the current split
print("[INFO] processing '{} split'...".format(split))
p = os.path.sep.join([BASE_PATH, split])
imagePaths = list(paths.list_images(p))
# randomly shuffle the image paths and then extract the class
# labels from the file paths
#random.shuffle(imagePaths)
labels = [p.split(os.path.sep)[-2] for p in imagePaths]
# if the label encoder is None, create it
if le is None:
le = LabelEncoder()
le.fit(labels)
# open the output CSV file for writing
csvPath = os.path.sep.join([BASE_CSV_PATH,
"{}.csv".format(split)])
csv = open(csvPath, "w")
# loop over the images in batches
for (b, i) in enumerate(range(0, len(imagePaths), BATCH_SIZE)):
# extract the batch of images and labels, then initialize the
# list of actual images that will be passed through the network
# for feature extraction
print("[INFO] processing batch {}/{}".format(b + 1,
int(np.ceil(len(imagePaths) / float(BATCH_SIZE)))))
batchPaths = imagePaths[i:i + BATCH_SIZE]
batchLabels = le.transform(labels[i:i + BATCH_SIZE])
batchImages = []
# loop over the images and labels in the current batch
for imagePath in batchPaths:
# load the input image using the Keras helper utility
# while ensuring the image is resized to 224x224 pixels
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
# preprocess the image by (1) expanding the dimensions and
# (2) subtracting the mean RGB pixel intensity from the
# ImageNet dataset
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)
# add the image to the batch
batchImages.append(image)
# pass the images through the network and use the outputs as
# our actual features, then reshape the features into a
# flattened volume
batchImages = np.vstack(batchImages)
features = model.predict(batchImages, batch_size=BATCH_SIZE)
features = features.reshape((features.shape[0], 7 * 7 * 512))
# loop over the class labels and extracted features
for (label, vec) in zip(batchLabels, features):
# construct a row that exists of the class label and
# extracted features
vec = ",".join([str(v) for v in vec])
csv.write("{},{}\n".format(label, vec))
# close the CSV file csv.close() # serialize the label encoder to disk f = open(LE_PATH, "wb")
f.write(pickle.dumps(le))
f.close()2024-06-12 13:09:00.027397: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-06-12 13:09:01.839505: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2024-06-12 13:09:01.847179: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2024-06-12 13:09:01.847420: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2024-06-12 13:09:01.848740: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2024-06-12 13:09:01.848957: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2024-06-12 13:09:01.849144: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2024-06-12 13:09:01.893914: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2024-06-12 13:09:01.894166: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2024-06-12 13:09:01.894367: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2024-06-12 13:09:01.894523: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1928] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 3503 MB memory: -> device: 0, name: NVIDIA GeForce GTX 960M, pci bus id: 0000:01:00.0, compute capability: 5.0
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1718197742.641173 90 service.cc:145] XLA service 0x7871e8004be0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
I0000 00:00:1718197742.641208 90 service.cc:153] StreamExecutor device (0): NVIDIA GeForce GTX 960M, Compute Capability 5.0
2024-06-12 13:09:02.653166: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:268] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.
2024-06-12 13:09:02.746601: I external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:465] Loaded cuDNN version 8906
I0000 00:00:1718197749.379902 90 device_compiler.h:188] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
[INFO] loading network...
[INFO] processing 'train split'...
[INFO] processing batch 1/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 7s 7s/step
[INFO] processing batch 2/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 25ms/step
[INFO] processing batch 3/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 21ms/step
[INFO] processing batch 4/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
[INFO] processing batch 5/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 24ms/step
[INFO] processing batch 6/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 19ms/step
[INFO] processing batch 7/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 22ms/step
[INFO] processing batch 8/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
[INFO] processing batch 9/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 21ms/step
[INFO] processing batch 10/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
[INFO] processing batch 11/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
[INFO] processing batch 12/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 27ms/step
[INFO] processing batch 13/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 29ms/step
[INFO] processing batch 14/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 23ms/step
[INFO] processing batch 15/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 37ms/step
[INFO] processing batch 16/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 22ms/step
[INFO] processing batch 17/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 22ms/step
[INFO] processing batch 18/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 18ms/step
[INFO] processing batch 19/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 27ms/step
[INFO] processing batch 20/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
[INFO] processing batch 21/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 18ms/step
[INFO] processing batch 22/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 24ms/step
[INFO] processing batch 23/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 18ms/step
[INFO] processing batch 24/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 24ms/step
[INFO] processing batch 25/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 22ms/step
[INFO] processing batch 26/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 26ms/step
[INFO] processing batch 27/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 21ms/step
[INFO] processing batch 28/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 18ms/step
[INFO] processing batch 29/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
[INFO] processing batch 30/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
[INFO] processing batch 31/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 27ms/step
[INFO] processing batch 32/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 19ms/step
[INFO] processing batch 33/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 23ms/step
[INFO] processing batch 34/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 25ms/step
[INFO] processing batch 35/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
[INFO] processing batch 36/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
[INFO] processing batch 37/328
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step
[INFO] processing batch 38/328
KeyboardInterrupt:
# import the necessary packages
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
import numpy as np
import pickle
import os
def load_data_split(splitPath):
# initialize the data and labels
data = []
labels = []
# loop over the rows in the data split file
for row in open(splitPath):
# extract the class label and features from the row
row = row.strip().split(",")
label = row[0]
features = np.array(row[1:], dtype="float")
# update the data and label lists
data.append(features)
labels.append(label)
# convert the data and labels to NumPy arrays
data = np.array(data)
labels = np.array(labels)
# return a tuple of the data and labels
return (data, labels)
# derive the paths to the training and testing CSV files
trainingPath = os.path.sep.join([BASE_CSV_PATH,
"{}.csv".format(TRAIN)])
testingPath = os.path.sep.join([BASE_CSV_PATH,
"{}.csv".format(TEST)])
# load the data from disk
print("[INFO] loading data...")
(trainX, trainY) = load_data_split(trainingPath)
print(trainX.shape)
(testX, testY) = load_data_split(testingPath)
# load the label encoder from disk
le = pickle.loads(open(LE_PATH, "rb").read())
# train the model
print("[INFO] training model...")
print(testX)
modelLogReg = LogisticRegression(solver="lbfgs", multi_class="auto",
max_iter=150)
modelLogReg.fit(trainX, trainY)
# evaluate the model
print("[INFO] evaluating...")
preds = modelLogReg.predict(testX)
print(classification_report(testY, preds, target_names=le.classes_))
# serialize the model to disk
print("[INFO] saving model...")
f = open(MODEL_PATH, "wb")
f.write(pickle.dumps(modelLogReg))
f.close()import os
import numpy as np
#from google.colab import files
from tensorflow.keras.preprocessing import image
from sklearn.metrics import multilabel_confusion_matrix
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
testdir = 'datasets/images/rps/test/'
uploaded = os.listdir(testdir)
fig = plt.figure(figsize= (10, 10))
#fig.tight_layout(pad=5.0)
plt.subplots_adjust(left=0.1,
bottom=0.1,
right=0.9,
top=0.9,
wspace=0.4,
hspace=0.4)
for i in range(len(uploaded)):
path = testdir + uploaded[i]
img = image.load_img(path, target_size = (224,224))
ax = fig.add_subplot(4, 4, i+1)
ax.imshow(img)
images = img_to_array(img)
images = np.expand_dims(images, axis=0)
images = preprocess_input(images)
#images = np.vstack([x])
features = model.predict(images, batch_size=BATCH_SIZE)
features = features.reshape((features.shape[0], 7 * 7 * 512))
pred = modelLogReg.predict(features)
ax.title.set_text(le.classes_[int(pred[0])])